Close

@InProceedings{CavallariPont:2021:SeSiNe,
               author = "Cavallari, Gabriel and Ponti, Moacir",
          affiliation = "{Universidade de S{\~a}o Paulo } and {Universidade de S{\~a}o 
                         Paulo}",
                title = "Semi-supervised siamese network using self-supervision under 
                         scarce annotation improves class separability and robustness to 
                         attack",
            booktitle = "Proceedings...",
                 year = "2021",
               editor = "Paiva, Afonso and Menotti, David and Baranoski, Gladimir V. G. and 
                         Proen{\c{c}}a, Hugo Pedro and Junior, Antonio Lopes Apolinario 
                         and Papa, Jo{\~a}o Paulo and Pagliosa, Paulo and dos Santos, 
                         Thiago Oliveira and e S{\'a}, Asla Medeiros and da Silveira, 
                         Thiago Lopes Trugillo and Brazil, Emilio Vital and Ponti, Moacir 
                         A. and Fernandes, Leandro A. F. and Avila, Sandra",
         organization = "Conference on Graphics, Patterns and Images, 34. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "deep learning, attack, self-supervision, self-supervised 
                         learning.",
             abstract = "Self-supervised learning approaches were shown to benefit feature 
                         learning by training models under a pretext task. In this context, 
                         learning from limited data can be tackled using a combination of 
                         semi-supervised learning and self-supervision. In this paper we 
                         combine the traditional supervised learning paradigm with the 
                         rotation prediction self-supervised task, that are used 
                         simultaneously to train a siamese model with a joint loss function 
                         and shared weights. In particular, we are interested in the case 
                         in which the proportion of labeled with respect to unlabeled data 
                         is small. We investigate the effectiveness of a compact feature 
                         space obtained after training under such limited annotation 
                         scenario, in terms of linear class separability and under attack. 
                         The study includes images from multiple domains, such as natural 
                         images (STL-10 dataset), products (Fashion-MNIST dataset) and 
                         biomedical images (Malaria dataset). We show that in scenarios 
                         where we have only a few labeled data the model augmented with a 
                         self-supervised task can take advantage of the unlabeled data to 
                         improve the learned representation in terms of the linear 
                         discrimination, as well as allowing learning even under attack. 
                         Also, we discuss the choices in terms of self-supervision and 
                         cases of failure considering the different datasets.",
  conference-location = "Gramado, RS, Brazil (virtual)",
      conference-year = "18-22 Oct. 2021",
                  doi = "10.1109/SIBGRAPI54419.2021.00038",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI54419.2021.00038",
             language = "en",
                  ibi = "8JMKD3MGPEW34M/45CUEK8",
                  url = "http://urlib.net/ibi/8JMKD3MGPEW34M/45CUEK8",
           targetfile = "81.pdf",
        urlaccessdate = "2024, May 06"
}


Close